home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
GCOSDependent.c
< prev
next >
Wrap
C/C++ Source or Header
|
1991-10-15
|
18KB
|
623 lines
/* begincopyright
Copyright (c) 1988,1990 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
Parts of this software were derived from code bearing the copyright notice:
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
This material may be freely distributed, provided this notice is retained.
This material is provided as is, with no warranty expressed or implied.
Use at your own risk.
endcopyright */
/*
* OSDependent.c
*
* Extremely primitive memory allocation stuff.
*
* Weiser, 1988.
*
* Demers, February 27, 1990 8:36:25 am PST
* Boehm, December 13, 1990 6:23:34 pm PST
*/
#define START_PROC_HACK
#define DEBUG_PAR_GC
#undef DEBUG_PAR_GC
#define DEBUG_SCHEDULING
#undef DEBUG_SCHEDULING
#include <sys/time.h>
#include "xr/GCPrivate.h"
#include "xr/Threads.h"
#include "xr/ThreadsBackdoor.h"
#include "xr/ThreadsSchedCtl.h"
#include "xr/ThreadsMsg.h"
#define I_HOLD_ML(ml) (((XR_ML)(ml))->ml_holder == XR_currThread)
/* 256MBytes max heap size */
#define MAXHEAPSIZE (1024*1024*256)
/* must be multiple of HBLKSIZE and a multiple of system pagesize */
#define INITIAL_HEAP 8*HBLKSIZE
/*
* Setup -- called before VPs are forked
*/
static void
XR_GCAddSharedSeg(seg)
XR_Seg seg;
{
if( seg->seg_bytes > 0 ) {
XR_add_data_list( seg->seg_addr, seg->seg_addr + seg->seg_bytes );
}
}
/* Mesa procs for parallel garbage collector */
static XR_MesaProc KeepAwakeProc;
static XR_MesaProc GC_daemon_proc;
void
XR_SetupGC()
{
word dummy; /* something in my frame to take the address of */
struct hblk *thishbp;
/* WARNING: the following works only for machines */
/* whose stacks grow toward smaller numbered addrs, */
/* and only for operating systems that grow the */
/* stack segment even if the stack pointer is not */
/* "reasonable". Undoubtedly there are many other */
/* restrictions. */
GC_mark_stack_bottom = &dummy;
GC_mark_stack_bottom -= STACKGAP;
GC_mark_stack_top = GC_mark_stack_bottom;
XR_InitializeMonitor(&GC_allocate_ml);
XR_InitializeMonitor(&GC_alloc_callback_ml);
if (sizeof (struct hblk) != HBLKSIZE) {
GC_abort("HBLK size inconsistency");
}
if (sizeof (long) != sizeof (int)) {
GC_iprintf("Long and int are not the same. GC will probably break.\n");
}
if (sizeof (long) != sizeof (bool)) {
GC_iprintf(
"Long and bool are not the same. GC will probably break.\n");
}
# ifdef ALIGN_DOUBLE
if (HDR_BYTES % 8 != 0) {
GC_abort("Improper header size");
}
# endif
# ifdef MERGE_SIZES
GC_init_size_map();
# endif
XR_GCAddSharedSeg( &(XR_sysArea->sa_sharedDataSeg) );
# ifdef UNDEFINED
XR_GCAddSharedSeg( &(XR_sysArea->sa_sharedCommonSeg) );
XR_GCAddSharedSeg( &(XR_sysArea->sa_sharedBSSSeg) );
# endif
GC_heapstart = ((char *)(XR_sysArea->sa_heapSeg.seg_addr));
if (GC_heapstart != (char *) HBLKPTR(GC_heapstart+HBLKSIZE-1)) {
GC_abort("Heapstart not properly aligned");
}
if (XR_sysArea->sa_heapSeg.seg_bytes != 0) {
GC_abort("Nonempty heap segment at startup");
}
GC_heaplim = GC_heapstart;
/* Set up initial heap. */
if (!GC_expand_hp(divHBLKSZ(INITIAL_HEAP + HBLKSIZE-1), TRUE)) {
GC_abort("Cant create initial heap");
}
GC_register_displacement(0); /* Pointers to the beginning of an */
/* object are always OK. */
GC_register_displacement(8); /* PCedar, among others, uses 8 byte */
/* object headers. */
GC_daemon_proc = XR_MakeMesaProc(GC_daemon,0);
GC_collector_setup();
{
static XR_Pointer GC_KeepAwake();
KeepAwakeProc = XR_MakeMesaProc(GC_KeepAwake, 0);
}
GC_RegisterCommands();
}
/* ??? delete the following someday ... ??? */
#ifndef XR_PRI_SYS_EXCLUSIVE
# define XR_PRI_SYS_EXCLUSIVE XR_PRI_LAST
#endif
/* Priorirty of "world stopped" collection */
static XR_Pri gcExclusivePriority = XR_PRI_SYS_EXCLUSIVE;
/* Priorirty of background GC keep awake thread */
static XR_Pri gcKeepAwakePriority = XR_PRI_SYS_FOREGROUND;
struct XR_CTRep GC_last_marker; /* Last started parallel collection thread. */
# ifdef DEBUG_PAR_GC
struct XR_CTRep GC_last_forker; /* parent of GC_last_marker */
int GC_last_gc_no;
# endif
# define WAKEUP_TICKS ((XR_Ticks)2)
# define MAXFRAMES 10
typedef unsigned int stack_type[MAXFRAMES];
# define NONE -1
# ifdef DEBUG_SCHEDULING
/* Trace back through the stack atrting at sp and save all pc values */
/* in stack. */
void save_stack(sp,stack)
register unsigned sp;
register stack_type * stack;
{
register int i = 0;
register unsigned nsp;
# define MAXDIF 64*1024
if (sp < (unsigned)GC_heaplim
|| sp > (unsigned)GC_heaplim + 0x10000000) goto finish;
while(i < MAXFRAMES) {
(*stack)[i++] = ((unsigned *)sp)[15];
nsp = ((unsigned *)sp)[14];
if(nsp < sp || nsp > sp + MAXDIF) break;
sp = nsp;
}
finish:
while(i < MAXFRAMES) {
(*stack)[i++] = NONE;
}
}
void print_stack(s1)
register stack_type *s1;
{
register int i;
for (i = 0; i < MAXFRAMES; i++) {
if ((*s1)[i] == NONE) break;
XR_ConsoleMsg("0x%X, ", (*s1)[i]);
}
XR_ConsoleMsg(" ...\n");
}
stack_type collector_stack;
# endif DEBUG_SCHEDULING
struct XR_CVRep GC_parallel_done_cv = { 0 };
bool GC_parallel_done_signalled = FALSE;
/* Signal completion of a parallel marker. Not essential for correctness, */
/* but avoids an unnecessary wait for a timeout. */
/* A superfluous call WILL break things. */
void GC_parallel_done()
{
GC_parallel_done_signalled = TRUE;
XR_Notify(&GC_parallel_done_cv);
}
/* Repeatedly do a directed yield to thread t, with the fraction of */
/* time allocated to t increasing quadratically in the ratio */
/* GC_words_allocd*GC_free_mem_ratio/GC_composite_in_use. */
void GC_donate_cycles(t)
XR_CT t;
{
unsigned long r;
unsigned long r2;
XR_Ticks ticks;
int count;
register int i;
register int c_in_use = GC_composite_in_use;
if ( c_in_use < MIN_COMPOSITE_IN_USE ) {
c_in_use = MIN_COMPOSITE_IN_USE;
}
GC_parallel_done_signalled = FALSE;
for (;;) {
r = 10 * GC_words_allocd * GC_free_mem_ratio
/ c_in_use;
r2 = r * r / 100;
if (r2 == 0) r2 = 1;
ticks = WAKEUP_TICKS;
count = 1;
if (r2 > ticks) {
ticks = 1;
count = r2/ticks;
} else {
ticks /= r2;
count = 1;
}
XR_SetTimeout(&GC_parallel_done_cv, ticks);
XR_WaitCV(&GC_parallel_done_cv, NIL);
if (count > 10) {
XR_LogVMsg
"%? Collector fell behind, count = %d, pri = %d, ticks = %d\n",
count, t -> ct_thread -> t_pri, ticks);
# ifdef DEBUG_SCHEDULING
save_stack(t -> ct_thread -> t_resume.jb_data[2],
collector_stack);
XR_ConsoleMsg("State = %d, pc = 0x%X, stack:\n",
t -> ct_thread -> t_sStat,
t -> ct_thread -> t_resume.jb_data[0]);
print_stack(collector_stack);
# endif
}
if (count > 4) count = 4;
for (i = 0; i < count; i++) {
if (GC_parallel_done_signalled || XR_ValidateCT(t) < 0) {
return;
}
if (t -> ct_thread -> t_sStat != XR_SSTAT_READY) break;
XR_Switch(t -> ct_thread, XR_SSTAT_READY, XR_SWSTAT_THREAD);
}
}
}
/* High priority process to wake up background gc on a regular basis. */
static XR_Pointer GC_KeepAwake()
{
struct XR_CTRep last_marker;
last_marker = GC_last_marker;
XR_SetPriority(gcKeepAwakePriority);
GC_donate_cycles(&last_marker);
return(0);
}
/* Start up the garbage collection daemon and switch to parallel and */
/* incremental collection. */
/* Idempotent if called only under GC_allocate_ml. */
bool GC_daemon_started = FALSE;
void GC_start_daemon()
{
struct XR_CTRep daemon_thread;
if (GC_daemon_started) return;
GC_daemon_started = TRUE;
XR_InitializeCondition(&GC_parallel_done_cv, XR_WAIT_FOREVER);
if (XR_TryFork (&daemon_thread, GC_daemon_proc) < 0) {
GC_printf("Cant fork garbage collection daemon - no threads\n");
}
}
/* Start up a partially parallel marker or collection. */
/* If wait is TRUE, then do not return until the mark */
/* or collection process mp has finished. In this */
/* case, also assume that we are already running at */
/* the correct priority. */
/* No more than one of these should run at once. */
void
GC_RunParallel( mp, wait )
XR_MesaProc mp;
bool wait;
{
struct XR_CTRep mark_thread;
struct XR_CTRep keep_awake_thread;
# ifdef START_PROC_HACK
XR_MesaProc old_proc = XR_currThread->t_startProc;
XR_currThread->t_startProc = 0;
# endif
if (XR_TryFork (&mark_thread, mp) < 0) {
GC_printf("Cant fork collector - no threads\n");
(*(mp -> mp_proc))(mp);
# ifdef START_PROC_HACK
XR_currThread->t_startProc = old_proc;
# endif
return;
} else {
if (XR_DetachCT(&mark_thread) < 0) {
GC_printf("Couldn't detach collector thread\n");
}
GC_last_marker = mark_thread;
}
# ifdef START_PROC_HACK
XR_currThread->t_startProc = old_proc;
# endif
/* Start a high priority process to donate cycles to the low priority */
/* marker process. */
if (wait) {
GC_donate_cycles(&mark_thread);
} else {
if (XR_TryFork (&keep_awake_thread, KeepAwakeProc) < 0) {
GC_printf("Cant fork keep awake thread - no threads\n");
} else {
if (XR_DetachCT(&keep_awake_thread) < 0) {
GC_printf("Couldn't detach keep awake thread\n");
}
}
}
# ifdef DEBUG_PAR_GC
GC_last_gc_no = GC_gc_no;
XR_GetCurrent(&GC_last_forker);
# endif DEBUG_PAR_GC
}
# ifdef PRINTTIMES
int GC_started_stats = 0; /* Started statistics at GC number ... */
int GC_total_pause_time = 0; /* Total pause time since start of stats */
int GC_max_pause_time = 0; /* Max pause time since start of stats */
/* Times are in milliseconds. */
void GC_reset_stats() {
GC_started_stats = GC_gc_no;
GC_total_pause_time = 0;
GC_max_pause_time = 0;
}
void GC_print_stats() {
register int n_colls = GC_gc_no - GC_started_stats;
if (n_colls == 0) {
XR_PrintF("No collections\n");
} else {
XR_PrintF("%d collections: Ave, max pause: %d, %d msecs\n",
n_colls, (GC_total_pause_time + (n_colls >> 1))/n_colls,
GC_max_pause_time);
}
}
# endif
/* Run proc with all threads running at less than max priority stopped. */
/* The caller should be running on vp 0, and will continue to do */
/* so. */
void
GC_RunExclusive( proc, clientData )
void (*proc)(/* XR_Pointer data */);
XR_Pointer clientData;
{
int ans, err;
XR_Pri oldPri;
bool Oexclusive = GC_running_exclusive;
unsigned long anyProcessor = scop_processorArgAny;
unsigned long noProcessor = scop_processorArgNone;
unsigned long processor0 = scop_processorArgVP(0);
# ifdef PRINTTIMES
struct timeval start_time;
# ifdef UNDEFINED
struct timeval interm_time1;
struct timeval interm_time2;
# endif
struct timeval end_time;
int stop_time; /* Time during which world was stopped, in msecs */
# ifdef UNDEFINED
int time1;
int time2;
# endif
# endif
# define CHECKANS(i) if(ans < 0) { err = i; goto Bad; }
oldPri = XR_GetPriority();
ans = XR_SchedCtl(
XR_SchedCtlWhichSelf(),
scop_setPriority,
((unsigned long *)(&gcExclusivePriority))
);
CHECKANS(0);
XR_AcquireVDWriteLock(); /* We will need to update dirty bits. */
/* We can't acquire locks with the */
/* world stopped. */
XR_AcquireIOPOrderLocks(); /* Acquire in case we have to issue an */
/* IOP order, e.g. to protect memory. */
ans = XR_SchedCtlLock(TRUE);
CHECKANS(1);
# ifdef PRINTTIMES
if (gettimeofday(&start_time,0) < 0) {
XR_ConsoleMsg("gettimeofday failed\n");
}
# endif
ans = XR_SchedCtlAll(
/*othersOp*/ scop_setProcessor,
/*othersArgp*/ &noProcessor,
/*myOp*/ scop_nop,
/*myArgp*/ &processor0
);
CHECKANS(2);
ans = XR_SchedCtlWait();
CHECKANS(3);
if( proc != NIL ) {
# ifdef UNDEFINED
if (gettimeofday(&interm_time1,0) < 0) {
XR_ConsoleMsg("gettimeofday failed\n");
}
# endif
XR_Yield(); /* make sure stack is in mem, the SP value */
/* known to the scheduler is up-to-date. */
/* This isn't really necessary, since we are */
/* only called from the GC daemon. That's a */
/* a good thing, since any registers saved by */
/* XR_Yield, etc., won't be seen by the */
/* collector. */
Oexclusive = GC_running_exclusive;
GC_running_exclusive = TRUE;
if (XR_vpe -> vpe_index != 0) {
XR_Panic("GC_RunExclusive: Not on vp 0");
}
(*proc)(clientData);
GC_running_exclusive = Oexclusive;
}
# ifdef UNDEFINED
if (gettimeofday(&interm_time2,0) < 0) {
XR_ConsoleMsg("gettimeofday failed\n");
}
# endif
ans = XR_SchedCtlAll(
scop_setProcessor,
&anyProcessor,
scop_nop,
&processor0
);
CHECKANS(4);
ans = XR_SchedCtlUnlock();
CHECKANS(5);
XR_ReleaseVDWriteLock();
XR_ReleaseIOPOrderLocks();
XR_SetPriority(oldPri);
# ifdef PRINTTIMES
if (gettimeofday(&end_time,0) < 0) {
XR_ConsoleMsg("gettimeofday failed\n");
}
stop_time = 1000*(end_time.tv_sec - start_time.tv_sec);
stop_time += (end_time.tv_usec - start_time.tv_usec)/1000;
XR_LogVMsg "Stopped world for %d msecs\n",
stop_time);
GC_total_pause_time += stop_time;
if (stop_time > GC_max_pause_time) GC_max_pause_time = stop_time;
# ifdef UNDEFINED
time1 = 1000*(interm_time1.tv_sec - start_time.tv_sec);
time1 += (interm_time1.tv_usec - start_time.tv_usec)/1000;
XR_LogVMsg "First intermediate time at %d msecs\n", time1);
time2 = 1000*(interm_time2.tv_sec - start_time.tv_sec);
time2 += (interm_time2.tv_usec - start_time.tv_usec)/1000;
XR_LogVMsg "Second intermediate time at %d msecs\n", time2);
# endif
# endif
return;
Bad:
XR_ConsoleMsg("%? schedctl ans %d err %d\n", ans, err);
XR_Panic("GCRunExclusive");
# undef CHECKANS
}
/* Return a new block of memory. */
struct hblk *
GC_get_sys_mem(size)
unsigned long size; /* increment, really! */
/* Returns (XR_Pointer)0 on failure. */
{
bool got_requested_size = TRUE;
struct hblk *thishbp;
struct XR_SegRep tSeg;
if( ! I_HOLD_ML(&(GC_allocate_ml)) )
XR_Panic("GC_get_sys_mem 0");
size = XR_RoundToPage(size, XR_ROUND_UP);
if( (((XR_Pointer)(GC_sys_mem_end)) + size) > XR_sysArea->sa_heapLimit ) {
return ((struct hblk *)0);
}
thishbp = (struct hblk *)
(XR_sysArea->sa_heapSeg.seg_addr+XR_sysArea->sa_heapSeg.seg_bytes);
XR_InitSeg2( &tSeg, ((XR_Pointer)(thishbp)), size );
if( XR_MapSharedHeapSeg(&tSeg) < 0 ) return ((struct hblk *)0);
XR_ExtendSeg( &(XR_sysArea->sa_heapSeg), size);
XR_sysArea->sa_heapNum += 1;
# ifdef PRINTSTATS
GC_vprintf( "Adding to heap with %d shared bytes at addr 0x%X.\n",
size, thishbp );
# endif
return (thishbp); /* success */
}
/* The analogous routine, but intended to be called during the startup */
/* process. May only be called once. */
struct hblk *
GC_get_sys_mem_initial(size)
unsigned long size;
{
struct hblk * thishbp;
size = XR_RoundToPage(size, XR_ROUND_UP);
if( ((XR_sysArea->sa_heapSeg.seg_addr) + size)
> XR_sysArea->sa_heapLimit ) {
return ((struct hblk *)0);
}
thishbp = (struct hblk *) (XR_sysArea->sa_heapSeg.seg_addr);
XR_ExtendSeg( &(XR_sysArea->sa_heapSeg), size);
XR_MapSharedHeapSeg( &(XR_sysArea->sa_heapSeg) );
XR_sysArea->sa_heapNum += 1;
# ifdef PRINTSTATS
GC_vprintf("Startup: Adding to heap with %d shared bytes at addr 0x%X.\n",
size, thishbp );
# endif
return (thishbp); /* success */
}
bool
XR_Increase_Heap(size)
unsigned size; /* increment */
{
bool ans;
if( I_HOLD_ML(&GC_allocate_ml) )
XR_Panic("Increase_Heap 0");
XR_MonitorEntry( &GC_allocate_ml );
ans = GC_expand_hp(divHBLKSZ(size + HBLKSIZE - 1), FALSE);
XR_MonitorExit( &GC_allocate_ml );
return ans;
}
/* Lock this thread to vp 0, so that UNIX stack can be relied upon, */
/* and so that I can relieably do a directed yield to another thread */
/* that has executed this call. */
/* This stays in effect until thread termination. */
void
GC_lock_to_fixed_vp ()
{
int ans;
unsigned long processor0 = scop_processorArgVP(0);
ans = XR_SchedCtl(
XR_SchedCtlWhichSelf(),
scop_setProcessor,
&processor0
);
if (ans < 0) GC_abort("GC_lock_to_fixed_vp 0");
ans = XR_SchedCtlWait();
if (ans < 0) GC_abort("GC_lock_to_fixed_vp 1");
}